* the effect is cleared. (i.e., MOV-SS-blocking 'dominates' STI-blocking).
*/
-static void enable_irq_window(struct vcpu *v)
+static void enable_intr_window(struct vcpu *v, enum hvm_intack intr_source)
{
- u32 *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
-
- if ( !(*cpu_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) )
+ u32 *cpu_exec_control = &v->arch.hvm_vcpu.u.vmx.exec_control;
+ u32 ctl = CPU_BASED_VIRTUAL_INTR_PENDING;
+
+ if ( unlikely(intr_source == hvm_intack_none) )
+ return;
+
+ if ( unlikely(intr_source == hvm_intack_nmi) && cpu_has_vmx_vnmi )
{
- *cpu_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
+ /*
+ * We set MOV-SS blocking in lieu of STI blocking when delivering an
+ * NMI. This is because it is processor-specific whether STI-blocking
+ * blocks NMIs. Hence we *must* check for STI-blocking on NMI delivery
+ * (otherwise vmentry will fail on processors that check for STI-
+ * blocking) but if the processor does not check for STI-blocking then
+ * we may immediately vmexit and hance make no progress!
+ * (see SDM 3B 21.3, "Other Causes of VM Exits").
+ */
+ u32 intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
+ if ( intr_shadow & VMX_INTR_SHADOW_STI )
+ {
+ /* Having both STI-blocking and MOV-SS-blocking fails vmentry. */
+ intr_shadow &= ~VMX_INTR_SHADOW_STI;
+ intr_shadow |= VMX_INTR_SHADOW_MOV_SS;
+ }
+ ctl = CPU_BASED_VIRTUAL_NMI_PENDING;
+ }
+
+ if ( !(*cpu_exec_control & ctl) )
+ {
+ *cpu_exec_control |= ctl;
__vmwrite(CPU_BASED_VM_EXEC_CONTROL, *cpu_exec_control);
}
}
if ( unlikely(v->arch.hvm_vmx.vector_injected) )
{
v->arch.hvm_vmx.vector_injected = 0;
- if ( unlikely(intr_source != hvm_intack_none) )
- enable_irq_window(v);
+ enable_intr_window(v, intr_source);
return;
}
idtv_info_field = __vmread(IDT_VECTORING_INFO_FIELD);
if ( unlikely(idtv_info_field & INTR_INFO_VALID_MASK) )
{
- __vmwrite(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
+ /* See SDM 3B 25.7.1.1 and .2 for info about masking resvd bits. */
+ __vmwrite(VM_ENTRY_INTR_INFO_FIELD,
+ idtv_info_field & ~INTR_INFO_RESVD_BITS_MASK);
/*
* Safe: the length will only be interpreted for software
if ( unlikely(idtv_info_field & 0x800) ) /* valid error code */
__vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE,
__vmread(IDT_VECTORING_ERROR_CODE));
- if ( unlikely(intr_source != hvm_intack_none) )
- enable_irq_window(v);
+ enable_intr_window(v, intr_source);
+
+ /*
+ * Clear NMI-blocking interruptibility info if an NMI delivery
+ * faulted. Re-delivery will re-set it (see SDM 3B 25.7.1.2).
+ */
+ if ( (idtv_info_field&INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI )
+ __vmwrite(GUEST_INTERRUPTIBILITY_INFO,
+ __vmread(GUEST_INTERRUPTIBILITY_INFO) &
+ ~VMX_INTR_SHADOW_NMI);
HVM_DBG_LOG(DBG_LEVEL_1, "idtv_info_field=%x", idtv_info_field);
return;
if ( likely(intr_source == hvm_intack_none) )
return;
- /*
- * TODO: Better NMI handling. Shouldn't wait for EFLAGS.IF==1, but
- * should wait for exit from 'NMI blocking' window (NMI injection to
- * next IRET). This requires us to use the new 'virtual NMI' support.
- */
if ( !hvm_interrupts_enabled(v, intr_source) )
{
- enable_irq_window(v);
+ enable_intr_window(v, intr_source);
return;
}
} while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) );
ASSERT(v == current);
- intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
- intr_shadow &= VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS;
+ intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
if ( type == hvm_intack_nmi )
- return !intr_shadow;
+ return !(intr_shadow & (VMX_INTR_SHADOW_STI|
+ VMX_INTR_SHADOW_MOV_SS|
+ VMX_INTR_SHADOW_NMI));
ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
eflags = __vmread(GUEST_RFLAGS);
- return !irq_masked(eflags) && !intr_shadow;
+ return (!irq_masked(eflags) &&
+ !(intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS)));
}
static void vmx_update_host_cr3(struct vcpu *v)
vector = intr_info & INTR_INFO_VECTOR_MASK;
+ /*
+ * Re-set the NMI shadow if vmexit caused by a guest IRET fault (see 3B
+ * 25.7.1.2, "Resuming Guest Software after Handling an Exception").
+ * (NB. If we emulate this IRET for any reason, we should re-clear!)
+ */
+ if ( unlikely(intr_info & INTR_INFO_NMI_UNBLOCKED_BY_IRET) &&
+ !(__vmread(IDT_VECTORING_INFO_FIELD) & INTR_INFO_VALID_MASK) &&
+ (vector != TRAP_double_fault) )
+ __vmwrite(GUEST_INTERRUPTIBILITY_INFO,
+ __vmread(GUEST_INTERRUPTIBILITY_INFO)|VMX_INTR_SHADOW_NMI);
+
perfc_incra(cause_vector, vector);
switch ( vector )